W0828 05:00:36.205762 1882310 torch/_inductor/utils.py:1406] [0/0] DeviceCopy in input program W0828 05:00:36.269448 1882310 torch/_inductor/utils.py:1406] [0/0] DeviceCopy in input program W0828 05:00:36.302968 1882310 torch/_inductor/utils.py:1406] [0/0] DeviceCopy in input program V0828 05:01:23.011752 1882310 torch/_dynamo/guards.py:2297] [0/0] [__guards] GUARDS: V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] TREE_GUARD_MANAGER: V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] +- RootGuardManager V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- DEFAULT_DEVICE: utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:471 in init_ambient_guards V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GLOBAL_STATE: ___check_global_state() V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- TORCH_FUNCTION_MODE_STACK: ___check_torch_function_mode_stack() V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['self'], accessed_by=DictGetItemGuardAccessor(self) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TYPE_MATCH: ___check_type_id(L['self'], 245047360) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=L['self']._buffers, accessed_by=GetAttrGuardAccessor(_buffers) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- DICT_LENGTH: not L['self']._buffers # _buffers = self.__dict__["_buffers"] # nn/modules/module.py:1908 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=L['self']._modules, accessed_by=GetAttrGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- DICT_LENGTH: len(L['self']._modules) == 8 # modules = self.__dict__["_modules"] # nn/modules/module.py:1912 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['pos_embed'], accessed_by=DictGetItemGuardAccessor(pos_embed) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'], 99356304) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['pos_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['pos_embed'].__dict__) # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim, accessed_by=DictGetItemGuardAccessor(axes_dim) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'].axes_dim, 7569792) # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- LENGTH_CHECK: len(L['self']._modules['pos_embed'].axes_dim) == 3 # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[0] == 16 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[1], accessed_by=TupleGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[1] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[2], accessed_by=TupleGetItemGuardAccessor(2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[2] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_pre_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['time_text_embed'], accessed_by=DictGetItemGuardAccessor(time_text_embed) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed'], 99372448) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['time_text_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed'].__dict__) # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules) == 4 # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'], accessed_by=DictGetItemGuardAccessor(time_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['time_proj'], 99358192) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # guidance_proj = self.time_proj(guidance) # diffusers/src/diffusers/models/embeddings.py:994 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].scale, accessed_by=DictGetItemGuardAccessor(scale) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].scale == 1 # emb = scale * emb # diffusers/src/diffusers/models/embeddings.py:66 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].num_channels, accessed_by=DictGetItemGuardAccessor(num_channels) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].num_channels == 256 # half_dim = embedding_dim // 2 # diffusers/src/diffusers/models/embeddings.py:56 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, accessed_by=DictGetItemGuardAccessor(flip_sin_to_cos) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, 7629952) # if flip_sin_to_cos: # diffusers/src/diffusers/models/embeddings.py:72 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift, accessed_by=DictGetItemGuardAccessor(downscale_freq_shift) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift == 0 # exponent = exponent / (half_dim - downscale_freq_shift) # diffusers/src/diffusers/models/embeddings.py:60 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'], accessed_by=DictGetItemGuardAccessor(timestep_embedder) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'], 99357248) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING: check_no_aliasing(L['img_ids'], L['txt_ids'], L['guidance'], L['timestep'], L['hidden_states'], L['pooled_projections'], L['encoder_hidden_states'], L['self']._modules['proj_out']._parameters['bias'], L['self']._modules['proj_out']._parameters['weight'], L['self']._modules['x_embedder']._parameters['bias'], L['self']._modules['x_embedder']._parameters['weight'], L['self']._modules['context_embedder']._parameters['bias'], L['self']._modules['context_embedder']._parameters['weight'], L['self']._modules['norm_out']._modules['linear']._parameters['bias'], L['self']._modules['norm_out']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight']) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], 96881248) # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'], accessed_by=DictGetItemGuardAccessor(guidance_embedder) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'], 99357248) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'], accessed_by=DictGetItemGuardAccessor(text_embedder) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder'], 100455248) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules) == 3 # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], 97167728) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 768], stride=[768, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], accessed_by=DictGetItemGuardAccessor(act_1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], 96881248) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], 97167728) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._parameters # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._parameters # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_pre_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['context_embedder'], accessed_by=DictGetItemGuardAccessor(context_embedder) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['context_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['context_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['context_embedder'].__dict__) # encoder_hidden_states = self.context_embedder(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:454 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['context_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 4096], stride=[4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['x_embedder'], accessed_by=DictGetItemGuardAccessor(x_embedder) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['x_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['x_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['x_embedder'].__dict__) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['x_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 64], stride=[64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['transformer_blocks'], accessed_by=DictGetItemGuardAccessor(transformer_blocks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 139846062622704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 139846062624576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 139846063044304) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[3] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 139846063046176) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[4] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 139846066996704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[5] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 139846066998768) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[6] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 139846062908992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[7] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 139846062911056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[8] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 139846069991072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[9] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 139846069993136) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[10] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 139846069233408) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[11] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 139846069235472) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[12] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 139846069036896) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[13] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 139846069038960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[14] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 139846068848576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[15] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 139846068850640) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[16] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 139846068639824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[17] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 139846067916960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[18] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], 97167728) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], 97167728) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], 97167728) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], 97167728) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], 97167728) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 139846067919072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 97167728) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'], 97167728) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'], accessed_by=DictGetItemGuardAccessor(single_transformer_blocks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 139846067704112) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 139846067704880) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 139846067705648) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 139846067706416) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 139846067707184) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 139846067507312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 139846067508080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 139846067508848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 139846067509616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 139846067510384) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 139846067511152) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 139846066291424) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 139846066292288) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 139846066293056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 139846066293824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 139846066294592) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 139846066090624) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 139846066091392) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 139846066092160) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=19 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] == '19' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 139846066092928) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=20 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] == '20' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 139846066093696) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=21 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] == '21' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 139846065365440) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=22 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] == '22' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 139846065366208) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=23 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] == '23' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 139846065366976) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=24 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] == '24' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 139846065367744) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=25 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] == '25' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 139846065368512) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=26 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] == '26' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 139846065164544) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=27 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] == '27' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 139846065165312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=28 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] == '28' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 139846065166080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=29 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] == '29' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 139846065166848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=30 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] == '30' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 139846065167616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=31 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] == '31' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 139846064955456) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=32 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] == '32' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 139846064956224) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=33 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] == '33' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 139846064956992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=34 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] == '34' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 139846064957760) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=35 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] == '35' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 139846064958528) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=36 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] == '36' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 139846064959296) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- KeyValueManager pair at index=37 V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] == '37' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'] V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], 97167728) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], 97167728) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], 97167728) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], 97167728) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], 97167728) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 139846064755328) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['norm_out'], accessed_by=DictGetItemGuardAccessor(norm_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out'], 99394624) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['norm_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out'].__dict__) # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules) == 3 # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['silu'], 96881248) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['silu'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['linear'], 97167728) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['linear'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[6144, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[6144], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['norm_out']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._parameters # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_pre_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=L['self']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['proj_out'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=L['self']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['proj_out'].__dict__) # output = self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:549 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[64, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[64], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=L['self'].training, accessed_by=GetAttrGuardAccessor(training) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(L['self'].training, 7629920) # if self.training and self.gradient_checkpointing: # diffusers/src/diffusers/models/transformers/transformer_flux.py:472 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=L['self']._parameters, accessed_by=GetAttrGuardAccessor(_parameters) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- DICT_LENGTH: not L['self']._parameters # _parameters = self.__dict__["_parameters"] # nn/modules/module.py:1904 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=L['self']._internal_dict, accessed_by=GetAttrGuardAccessor(_internal_dict) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- TYPE_MATCH: ___check_type_id(L['self']._internal_dict, 221665040) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'norm_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'proj_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'pos_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'x_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'time_text_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'context_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'single_transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['img_ids'], accessed_by=DictGetItemGuardAccessor(img_ids) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['img_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[4096, 3], stride=[3, 1]) # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['img_ids'], '_dynamo_dynamic_indices') == False # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['txt_ids'], accessed_by=DictGetItemGuardAccessor(txt_ids) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['txt_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[512, 3], stride=[3, 1]) # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['txt_ids'], '_dynamo_dynamic_indices') == False # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['guidance'], accessed_by=DictGetItemGuardAccessor(guidance) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['guidance'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.float32, device=0, requires_grad=False, size=[1], stride=[1]) # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['guidance'], '_dynamo_dynamic_indices') == False # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['timestep'], accessed_by=DictGetItemGuardAccessor(timestep) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['timestep'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1], stride=[1]) # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['timestep'], '_dynamo_dynamic_indices') == False # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['return_dict'], accessed_by=DictGetItemGuardAccessor(return_dict) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- ID_MATCH: ___check_obj_id(L['return_dict'], 7629920) # if not return_dict: # diffusers/src/diffusers/models/transformers/transformer_flux.py:555 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['hidden_states'], accessed_by=DictGetItemGuardAccessor(hidden_states) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 4096, 64], stride=[262144, 64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['pooled_projections'], accessed_by=DictGetItemGuardAccessor(pooled_projections) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['pooled_projections'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 768], stride=[768, 1]) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['pooled_projections'], '_dynamo_dynamic_indices') == False # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['encoder_hidden_states'], accessed_by=DictGetItemGuardAccessor(encoder_hidden_states) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- TENSOR_MATCH: check_tensor(L['encoder_hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 512, 4096], stride=[2097152, 4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_HASATTR: hasattr(L['encoder_hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['joint_attention_kwargs'], accessed_by=DictGetItemGuardAccessor(joint_attention_kwargs) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- ID_MATCH: ___check_obj_id(L['joint_attention_kwargs'], 7580768) # if joint_attention_kwargs is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['controlnet_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_block_samples) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_block_samples'], 7580768) # if controlnet_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:502 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=L['controlnet_single_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_single_block_samples) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_single_block_samples'], 7580768) # if controlnet_single_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:538 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | +- GuardManager: source=G, accessed_by=GlobalsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['USE_PEFT_BACKEND'], accessed_by=DictGetItemGuardAccessor(USE_PEFT_BACKEND) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['USE_PEFT_BACKEND'], 7629952) # if USE_PEFT_BACKEND: # diffusers/src/diffusers/models/transformers/transformer_flux.py:434 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['scale_lora_layers'], accessed_by=DictGetItemGuardAccessor(scale_lora_layers) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['scale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['scale_lora_layers'].__code__, 139856000130016) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['unscale_lora_layers'], accessed_by=DictGetItemGuardAccessor(unscale_lora_layers) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['unscale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['unscale_lora_layers'].__code__, 139856000130192) # unscale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:553 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__builtins_dict___0'], accessed_by=DictGetItemGuardAccessor(__builtins_dict___0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['int'], accessed_by=DictGetItemGuardAccessor(int) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['int'], 7592000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['len'], accessed_by=DictGetItemGuardAccessor(len) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['len'], 139859477898240) # assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" # diffusers/src/diffusers/models/embeddings.py:54 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['set'], accessed_by=DictGetItemGuardAccessor(set) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['set'], 7574816) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['str'], accessed_by=DictGetItemGuardAccessor(str) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['str'], 7556320) # return str(idx) # nn/modules/container.py:319 in _get_abs_string_index V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['iter'], accessed_by=DictGetItemGuardAccessor(iter) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['iter'], 139859477898160) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['range'], accessed_by=DictGetItemGuardAccessor(range) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['range'], 7576320) # for i in range(n_axes): # diffusers/src/diffusers/models/embeddings.py:628 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['slice'], accessed_by=DictGetItemGuardAccessor(slice) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['slice'], 7571168) # if isinstance(idx, slice): # nn/modules/container.py:331 in __getitem__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['super'], accessed_by=DictGetItemGuardAccessor(super) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['super'], 7562400) # return super().__getattr__(name) # diffusers/src/diffusers/models/modeling_utils.py:151 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['hasattr'], accessed_by=DictGetItemGuardAccessor(hasattr) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['hasattr'], 139859477897600) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['enumerate'], accessed_by=DictGetItemGuardAccessor(enumerate) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['enumerate'], 7452256) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__builtins_dict___0']['isinstance'], accessed_by=DictGetItemGuardAccessor(isinstance) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___0']['isinstance'], 139859477898000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention'], 139846585526608) # if len(args) > 0 or kwargs.get("scale", None) is not None: # diffusers/src/diffusers/models/attention.py:1162 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_embeddings) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'], 139846586042224) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math, accessed_by=GetAttrGuardAccessor(math) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math, 139859475307632) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math.log, accessed_by=GetAttrGuardAccessor(log) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math.log, 139859474302592) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch, 139859475513072) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) # diffusers/src/diffusers/models/transformers/transformer_flux.py:96 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1755 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, accessed_by=GetAttrGuardAccessor(cat) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, 139859471507920) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, accessed_by=GetAttrGuardAccessor(cos) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, 139859471509120) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, accessed_by=GetAttrGuardAccessor(exp) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, 139859471510480) # emb = torch.exp(exponent) # diffusers/src/diffusers/models/embeddings.py:62 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, accessed_by=GetAttrGuardAccessor(sin) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, 139856572546480) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, accessed_by=GetAttrGuardAccessor(chunk) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, 139859471508080) # scale, shift = torch.chunk(emb, 2, dim=1) # diffusers/src/diffusers/models/normalization.py:305 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, accessed_by=GetAttrGuardAccessor(outer) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, 139856572571056) # freqs = torch.outer(t, freqs) # type: ignore # [S, D/2] # diffusers/src/diffusers/models/embeddings.py:552 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, accessed_by=GetAttrGuardAccessor(rsqrt) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, 139859471462208) # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, accessed_by=GetAttrGuardAccessor(stack) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, 139859471467760) # x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) # diffusers/src/diffusers/models/embeddings.py:595 in apply_rotary_emb V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, accessed_by=GetAttrGuardAccessor(arange) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, 139859471382304) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float16, accessed_by=GetAttrGuardAccessor(float16) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float16 == torch.float16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float32, accessed_by=GetAttrGuardAccessor(float32) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float32 == torch.float32 # start=0, end=half_dim, dtype=torch.float32, device=timesteps.device # diffusers/src/diffusers/models/embeddings.py:58 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float64, accessed_by=GetAttrGuardAccessor(float64) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float64 == torch.float64 # freqs_dtype = torch.float32 if is_mps else torch.float64 # diffusers/src/diffusers/models/embeddings.py:627 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16, accessed_by=GetAttrGuardAccessor(bfloat16) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16 == torch.bfloat16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, accessed_by=GetAttrGuardAccessor(from_numpy) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, 139859471373712) # t = torch.from_numpy(pos).to(freqs.device) # type: ignore # [S] # diffusers/src/diffusers/models/embeddings.py:551 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=GetAttrGuardAccessor(apply_rotary_emb) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, 139855833426912) # from .embeddings import apply_rotary_emb # diffusers/src/diffusers/models/attention_processor.py:1760 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], 7629952) # if use_real: # diffusers/src/diffusers/models/embeddings.py:586 in apply_rotary_emb V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1], accessed_by=GetItemGuardAccessor(1) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1] == -1 # if use_real_unbind_dim == -1: # diffusers/src/diffusers/models/embeddings.py:592 in apply_rotary_emb V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=GetAttrGuardAccessor(get_timestep_embedding) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, 139855833348912) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3] == 10000 # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=GetAttrGuardAccessor(get_1d_rotary_pos_embed) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, 139855833425856) # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=FuncDefaultsGuardAccessor V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0] == 10000.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2], accessed_by=GetItemGuardAccessor(2) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2] == 1.0 # freqs = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype)[: (dim // 2)] / dim)) / linear_factor # [D/2] # diffusers/src/diffusers/models/embeddings.py:550 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3] == 1.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'], 139856042419520) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F, 139856042421440) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, accessed_by=GetAttrGuardAccessor(gelu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, 139856049958000) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, accessed_by=GetAttrGuardAccessor(silu) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, 139856039794000) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, accessed_by=GetAttrGuardAccessor(linear) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, 139856049959216) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, accessed_by=GetAttrGuardAccessor(dropout) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, 139856039777904) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, accessed_by=GetAttrGuardAccessor(layer_norm) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, 139856039795440) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, accessed_by=GetAttrGuardAccessor(scaled_dot_product_attention) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, 139856049962576) # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_module) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_module'], 139856045629904) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_pre_hooks) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_activations) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_activations'], 139855999915008) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_dropout) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_dropout'], 139856038787168) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_normalization) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_normalization'], 139846585727152) # variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) # diffusers/src/diffusers/models/normalization.py:427 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_container) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'], 139856039033488) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator, accessed_by=GetAttrGuardAccessor(operator) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator, 139859476171088) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, accessed_by=GetAttrGuardAccessor(index) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, 139859476211968) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_modeling_utils'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_modeling_utils) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_modeling_utils'], 139855997135104) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_activation) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_activation'], 139856042420880) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_normalization) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_normalization'], 139856038867568) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention_processor) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'], 139855999915568) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect, accessed_by=GetAttrGuardAccessor(inspect) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect, 139859475305312) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature, accessed_by=GetAttrGuardAccessor(signature) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, 139859474846800) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1755 in __call__ V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'], accessed_by=DictGetItemGuardAccessor(__import_peft_dot_tuners_dot_tuners_utils) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, accessed_by=GetAttrGuardAccessor(BaseTunerLayer) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, 244527504) # from peft.tuners.tuners_utils import BaseTunerLayer # diffusers/src/diffusers/utils/peft_utils.py:113 in scale_lora_layers V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | +- GuardManager: source=G['torch'], accessed_by=DictGetItemGuardAccessor(torch) V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) # diffusers/src/diffusers/models/transformers/transformer_flux.py:96 in forward V0828 05:01:23.013332 1882310 torch/_dynamo/guards.py:2263] [0/0] [__guards] V0828 05:01:35.223701 1882310 torch/_dynamo/guards.py:2796] [0/1] [__recompiles] Recompiling function forward in /fsx/sayak/diffusers/src/diffusers/models/transformers/transformer_flux.py:388 V0828 05:01:35.223701 1882310 torch/_dynamo/guards.py:2796] [0/1] [__recompiles] triggered by the following guard failure(s): V0828 05:01:35.223701 1882310 torch/_dynamo/guards.py:2796] [0/1] [__recompiles] - 0/0: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward W0828 05:02:32.900942 1882310 torch/_inductor/utils.py:1406] [0/1] DeviceCopy in input program W0828 05:02:32.961526 1882310 torch/_inductor/utils.py:1406] [0/1] DeviceCopy in input program W0828 05:02:32.994202 1882310 torch/_inductor/utils.py:1406] [0/1] DeviceCopy in input program V0828 05:03:29.818554 1882310 torch/_dynamo/guards.py:2297] [0/1] [__guards] GUARDS: V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] TREE_GUARD_MANAGER: V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] +- RootGuardManager V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- DEFAULT_DEVICE: utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:471 in init_ambient_guards V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GLOBAL_STATE: ___check_global_state() V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- TORCH_FUNCTION_MODE_STACK: ___check_torch_function_mode_stack() V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['self'], accessed_by=DictGetItemGuardAccessor(self) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TYPE_MATCH: ___check_type_id(L['self'], 245047360) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=L['self']._buffers, accessed_by=GetAttrGuardAccessor(_buffers) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- DICT_LENGTH: not L['self']._buffers # _buffers = self.__dict__["_buffers"] # nn/modules/module.py:1908 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=L['self']._modules, accessed_by=GetAttrGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- DICT_LENGTH: len(L['self']._modules) == 8 # modules = self.__dict__["_modules"] # nn/modules/module.py:1912 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['pos_embed'], accessed_by=DictGetItemGuardAccessor(pos_embed) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'], 99356304) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['pos_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['pos_embed'].__dict__) # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim, accessed_by=DictGetItemGuardAccessor(axes_dim) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'].axes_dim, 7569792) # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- LENGTH_CHECK: len(L['self']._modules['pos_embed'].axes_dim) == 3 # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[0] == 16 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[1], accessed_by=TupleGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[1] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[2], accessed_by=TupleGetItemGuardAccessor(2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[2] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_pre_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['time_text_embed'], accessed_by=DictGetItemGuardAccessor(time_text_embed) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed'], 99372448) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['time_text_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed'].__dict__) # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules) == 4 # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'], accessed_by=DictGetItemGuardAccessor(time_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['time_proj'], 99358192) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # guidance_proj = self.time_proj(guidance) # diffusers/src/diffusers/models/embeddings.py:994 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].scale, accessed_by=DictGetItemGuardAccessor(scale) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].scale == 1 # emb = scale * emb # diffusers/src/diffusers/models/embeddings.py:66 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].num_channels, accessed_by=DictGetItemGuardAccessor(num_channels) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].num_channels == 256 # half_dim = embedding_dim // 2 # diffusers/src/diffusers/models/embeddings.py:56 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, accessed_by=DictGetItemGuardAccessor(flip_sin_to_cos) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, 7629952) # if flip_sin_to_cos: # diffusers/src/diffusers/models/embeddings.py:72 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift, accessed_by=DictGetItemGuardAccessor(downscale_freq_shift) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift == 0 # exponent = exponent / (half_dim - downscale_freq_shift) # diffusers/src/diffusers/models/embeddings.py:60 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'], accessed_by=DictGetItemGuardAccessor(timestep_embedder) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'], 99357248) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING: check_no_aliasing(L['img_ids'], L['txt_ids'], L['guidance'], L['timestep'], L['hidden_states'], L['pooled_projections'], L['encoder_hidden_states'], L['self']._modules['proj_out']._parameters['bias'], L['self']._modules['proj_out']._parameters['weight'], L['self']._modules['x_embedder']._parameters['bias'], L['self']._modules['x_embedder']._parameters['weight'], L['self']._modules['context_embedder']._parameters['bias'], L['self']._modules['context_embedder']._parameters['weight'], L['self']._modules['norm_out']._modules['linear']._parameters['bias'], L['self']._modules['norm_out']._modules['linear']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight']) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], 96881248) # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'], accessed_by=DictGetItemGuardAccessor(guidance_embedder) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'], 99357248) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'], accessed_by=DictGetItemGuardAccessor(text_embedder) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder'], 100455248) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules) == 3 # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], 97167728) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 768], stride=[768, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], accessed_by=DictGetItemGuardAccessor(act_1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], 96881248) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], 97167728) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._parameters # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._parameters # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_pre_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['context_embedder'], accessed_by=DictGetItemGuardAccessor(context_embedder) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['context_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['context_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['context_embedder'].__dict__) # encoder_hidden_states = self.context_embedder(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:454 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['context_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 4096], stride=[4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['x_embedder'], accessed_by=DictGetItemGuardAccessor(x_embedder) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['x_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['x_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['x_embedder'].__dict__) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['x_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 64], stride=[64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['transformer_blocks'], accessed_by=DictGetItemGuardAccessor(transformer_blocks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 139846062622704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 139846062624576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 139846063044304) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[3] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 139846063046176) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[4] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 139846066996704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[5] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 139846066998768) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[6] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 139846062908992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[7] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 139846062911056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[8] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 139846069991072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[9] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 139846069993136) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[10] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 139846069233408) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[11] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 139846069235472) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[12] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 139846069036896) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[13] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 139846069038960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[14] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 139846068848576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[15] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 139846068850640) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[16] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 139846068639824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[17] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 139846067916960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[18] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 139846067919072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'], accessed_by=DictGetItemGuardAccessor(single_transformer_blocks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 139846067704112) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 139846067704880) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 139846067705648) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 139846067706416) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 139846067707184) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 139846067507312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 139846067508080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 139846067508848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 139846067509616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 139846067510384) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 139846067511152) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 139846066291424) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 139846066292288) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 139846066293056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 139846066293824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 139846066294592) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 139846066090624) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 139846066091392) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 139846066092160) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=19 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] == '19' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 139846066092928) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=20 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] == '20' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 139846066093696) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=21 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] == '21' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 139846065365440) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=22 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] == '22' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 139846065366208) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=23 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] == '23' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 139846065366976) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=24 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] == '24' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 139846065367744) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=25 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] == '25' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 139846065368512) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=26 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] == '26' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 139846065164544) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=27 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] == '27' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 139846065165312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=28 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] == '28' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 139846065166080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=29 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] == '29' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 139846065166848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=30 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] == '30' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 139846065167616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=31 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] == '31' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 139846064955456) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=32 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] == '32' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 139846064956224) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=33 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] == '33' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 139846064956992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=34 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] == '34' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 139846064957760) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=35 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] == '35' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 139846064958528) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=36 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] == '36' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 139846064959296) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- KeyValueManager pair at index=37 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] == '37' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 1 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora) == 1 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 139846064755328) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['norm_out'], accessed_by=DictGetItemGuardAccessor(norm_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out'], 99394624) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['norm_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out'].__dict__) # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules) == 3 # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['silu'], 96881248) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['silu'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['linear'], 97167728) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['linear'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[6144, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[6144], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['norm_out']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._parameters # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_pre_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=L['self']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['proj_out'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=L['self']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['proj_out'].__dict__) # output = self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:549 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[64, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[64], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=L['self'].training, accessed_by=GetAttrGuardAccessor(training) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(L['self'].training, 7629920) # if self.training and self.gradient_checkpointing: # diffusers/src/diffusers/models/transformers/transformer_flux.py:472 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=L['self']._parameters, accessed_by=GetAttrGuardAccessor(_parameters) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- DICT_LENGTH: not L['self']._parameters # _parameters = self.__dict__["_parameters"] # nn/modules/module.py:1904 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=L['self']._internal_dict, accessed_by=GetAttrGuardAccessor(_internal_dict) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- TYPE_MATCH: ___check_type_id(L['self']._internal_dict, 221665040) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'norm_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'proj_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'pos_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'x_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'time_text_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'context_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'single_transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['img_ids'], accessed_by=DictGetItemGuardAccessor(img_ids) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['img_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[4096, 3], stride=[3, 1]) # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['img_ids'], '_dynamo_dynamic_indices') == False # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['txt_ids'], accessed_by=DictGetItemGuardAccessor(txt_ids) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['txt_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[512, 3], stride=[3, 1]) # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['txt_ids'], '_dynamo_dynamic_indices') == False # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['guidance'], accessed_by=DictGetItemGuardAccessor(guidance) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['guidance'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.float32, device=0, requires_grad=False, size=[1], stride=[1]) # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['guidance'], '_dynamo_dynamic_indices') == False # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['timestep'], accessed_by=DictGetItemGuardAccessor(timestep) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['timestep'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1], stride=[1]) # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['timestep'], '_dynamo_dynamic_indices') == False # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['return_dict'], accessed_by=DictGetItemGuardAccessor(return_dict) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- ID_MATCH: ___check_obj_id(L['return_dict'], 7629920) # if not return_dict: # diffusers/src/diffusers/models/transformers/transformer_flux.py:555 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['hidden_states'], accessed_by=DictGetItemGuardAccessor(hidden_states) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 4096, 64], stride=[262144, 64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['pooled_projections'], accessed_by=DictGetItemGuardAccessor(pooled_projections) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['pooled_projections'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 768], stride=[768, 1]) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['pooled_projections'], '_dynamo_dynamic_indices') == False # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['encoder_hidden_states'], accessed_by=DictGetItemGuardAccessor(encoder_hidden_states) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- TENSOR_MATCH: check_tensor(L['encoder_hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 512, 4096], stride=[2097152, 4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_HASATTR: hasattr(L['encoder_hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['joint_attention_kwargs'], accessed_by=DictGetItemGuardAccessor(joint_attention_kwargs) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- ID_MATCH: ___check_obj_id(L['joint_attention_kwargs'], 7580768) # if joint_attention_kwargs is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['controlnet_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_block_samples) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_block_samples'], 7580768) # if controlnet_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:502 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=L['controlnet_single_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_single_block_samples) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_single_block_samples'], 7580768) # if controlnet_single_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:538 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | +- GuardManager: source=G, accessed_by=GlobalsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['USE_PEFT_BACKEND'], accessed_by=DictGetItemGuardAccessor(USE_PEFT_BACKEND) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['USE_PEFT_BACKEND'], 7629952) # if USE_PEFT_BACKEND: # diffusers/src/diffusers/models/transformers/transformer_flux.py:434 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['scale_lora_layers'], accessed_by=DictGetItemGuardAccessor(scale_lora_layers) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['scale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['scale_lora_layers'].__code__, 139856000130016) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['unscale_lora_layers'], accessed_by=DictGetItemGuardAccessor(unscale_lora_layers) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['unscale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['unscale_lora_layers'].__code__, 139856000130192) # unscale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:553 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__builtins_dict___2'], accessed_by=DictGetItemGuardAccessor(__builtins_dict___2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['int'], accessed_by=DictGetItemGuardAccessor(int) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['int'], 7592000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['len'], accessed_by=DictGetItemGuardAccessor(len) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['len'], 139859477898240) # assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" # diffusers/src/diffusers/models/embeddings.py:54 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['set'], accessed_by=DictGetItemGuardAccessor(set) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['set'], 7574816) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['str'], accessed_by=DictGetItemGuardAccessor(str) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['str'], 7556320) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['bool'], accessed_by=DictGetItemGuardAccessor(bool) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['bool'], 7629504) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['iter'], accessed_by=DictGetItemGuardAccessor(iter) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['iter'], 139859477898160) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['range'], accessed_by=DictGetItemGuardAccessor(range) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['range'], 7576320) # for i in range(n_axes): # diffusers/src/diffusers/models/embeddings.py:628 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['slice'], accessed_by=DictGetItemGuardAccessor(slice) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['slice'], 7571168) # if isinstance(idx, slice): # nn/modules/container.py:331 in __getitem__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['super'], accessed_by=DictGetItemGuardAccessor(super) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['super'], 7562400) # return super().__getattr__(name) # diffusers/src/diffusers/models/modeling_utils.py:151 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['hasattr'], accessed_by=DictGetItemGuardAccessor(hasattr) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['hasattr'], 139859477897600) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['enumerate'], accessed_by=DictGetItemGuardAccessor(enumerate) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['enumerate'], 7452256) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__builtins_dict___2']['isinstance'], accessed_by=DictGetItemGuardAccessor(isinstance) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___2']['isinstance'], 139859477898000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'], accessed_by=DictGetItemGuardAccessor(__import_peft_dot_tuners_dot_tuners_utils) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_peft_dot_tuners_dot_tuners_utils'], 139846567629904) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, accessed_by=GetAttrGuardAccessor(BaseTunerLayer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, 244527504) # from peft.tuners.tuners_utils import BaseTunerLayer # diffusers/src/diffusers/utils/peft_utils.py:113 in scale_lora_layers V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention'], 139846585526608) # if len(args) > 0 or kwargs.get("scale", None) is not None: # diffusers/src/diffusers/models/attention.py:1162 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_embeddings) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'], 139846586042224) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math, accessed_by=GetAttrGuardAccessor(math) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math, 139859475307632) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math.log, accessed_by=GetAttrGuardAccessor(log) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math.log, 139859474302592) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch, 139859475513072) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # if encoder_hidden_states.dtype == torch.float16: # diffusers/src/diffusers/models/transformers/transformer_flux.py:200 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1756 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, accessed_by=GetAttrGuardAccessor(cat) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, 139859471507920) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, accessed_by=GetAttrGuardAccessor(cos) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, 139859471509120) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, accessed_by=GetAttrGuardAccessor(exp) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, 139859471510480) # emb = torch.exp(exponent) # diffusers/src/diffusers/models/embeddings.py:62 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, accessed_by=GetAttrGuardAccessor(sin) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, 139856572546480) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, accessed_by=GetAttrGuardAccessor(chunk) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, 139859471508080) # scale, shift = torch.chunk(emb, 2, dim=1) # diffusers/src/diffusers/models/normalization.py:305 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, accessed_by=GetAttrGuardAccessor(outer) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, 139856572571056) # freqs = torch.outer(t, freqs) # type: ignore # [S, D/2] # diffusers/src/diffusers/models/embeddings.py:552 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, accessed_by=GetAttrGuardAccessor(rsqrt) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, 139859471462208) # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, accessed_by=GetAttrGuardAccessor(stack) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, 139859471467760) # x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) # diffusers/src/diffusers/models/embeddings.py:595 in apply_rotary_emb V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, accessed_by=GetAttrGuardAccessor(arange) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, 139859471382304) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float16, accessed_by=GetAttrGuardAccessor(float16) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float16 == torch.float16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float32, accessed_by=GetAttrGuardAccessor(float32) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float32 == torch.float32 # start=0, end=half_dim, dtype=torch.float32, device=timesteps.device # diffusers/src/diffusers/models/embeddings.py:58 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float64, accessed_by=GetAttrGuardAccessor(float64) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float64 == torch.float64 # freqs_dtype = torch.float32 if is_mps else torch.float64 # diffusers/src/diffusers/models/embeddings.py:627 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16, accessed_by=GetAttrGuardAccessor(bfloat16) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16 == torch.bfloat16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, accessed_by=GetAttrGuardAccessor(from_numpy) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, 139859471373712) # t = torch.from_numpy(pos).to(freqs.device) # type: ignore # [S] # diffusers/src/diffusers/models/embeddings.py:551 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=GetAttrGuardAccessor(apply_rotary_emb) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, 139855833426912) # from .embeddings import apply_rotary_emb # diffusers/src/diffusers/models/attention_processor.py:1760 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], 7629952) # if use_real: # diffusers/src/diffusers/models/embeddings.py:586 in apply_rotary_emb V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1], accessed_by=GetItemGuardAccessor(1) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1] == -1 # if use_real_unbind_dim == -1: # diffusers/src/diffusers/models/embeddings.py:592 in apply_rotary_emb V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=GetAttrGuardAccessor(get_timestep_embedding) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, 139855833348912) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3] == 10000 # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=GetAttrGuardAccessor(get_1d_rotary_pos_embed) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, 139855833425856) # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=FuncDefaultsGuardAccessor V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0] == 10000.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2], accessed_by=GetItemGuardAccessor(2) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2] == 1.0 # freqs = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype)[: (dim // 2)] / dim)) / linear_factor # [D/2] # diffusers/src/diffusers/models/embeddings.py:550 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3] == 1.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'], 139856042419520) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F, 139856042421440) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, accessed_by=GetAttrGuardAccessor(gelu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, 139856049958000) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, accessed_by=GetAttrGuardAccessor(silu) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, 139856039794000) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, accessed_by=GetAttrGuardAccessor(linear) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, 139856049959216) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, accessed_by=GetAttrGuardAccessor(dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, 139856039777904) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, accessed_by=GetAttrGuardAccessor(layer_norm) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, 139856039795440) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, accessed_by=GetAttrGuardAccessor(scaled_dot_product_attention) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, 139856049962576) # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_module) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_module'], 139856045629904) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_pre_hooks) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_activations) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_activations'], 139855999915008) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_dropout) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_dropout'], 139856038787168) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_normalization) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_normalization'], 139846585727152) # variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) # diffusers/src/diffusers/models/normalization.py:427 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_container) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'], 139856039033488) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator, accessed_by=GetAttrGuardAccessor(operator) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator, 139859476171088) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, accessed_by=GetAttrGuardAccessor(index) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, 139859476211968) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_modeling_utils'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_modeling_utils) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_modeling_utils'], 139855997135104) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_activation) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_activation'], 139856042420880) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_normalization) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_normalization'], 139856038867568) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention_processor) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'], 139855999915568) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect, accessed_by=GetAttrGuardAccessor(inspect) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect, 139859475305312) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature, accessed_by=GetAttrGuardAccessor(signature) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, 139859474846800) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # key = torch.cat([encoder_hidden_states_key_proj, key], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1756 in __call__ V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | +- GuardManager: source=G['torch'], accessed_by=DictGetItemGuardAccessor(torch) V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # if encoder_hidden_states.dtype == torch.float16: # diffusers/src/diffusers/models/transformers/transformer_flux.py:200 in forward V0828 05:03:29.821048 1882310 torch/_dynamo/guards.py:2263] [0/1] [__guards] V0828 05:04:31.688316 1882310 torch/_dynamo/guards.py:2796] [0/2] [__recompiles] Recompiling function forward in /fsx/sayak/diffusers/src/diffusers/models/transformers/transformer_flux.py:388 V0828 05:04:31.688316 1882310 torch/_dynamo/guards.py:2796] [0/2] [__recompiles] triggered by the following guard failure(s): V0828 05:04:31.688316 1882310 torch/_dynamo/guards.py:2796] [0/2] [__recompiles] - 0/1: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling) == 1 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:04:31.688316 1882310 torch/_dynamo/guards.py:2796] [0/2] [__recompiles] - 0/0: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], 97167728) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.843203 1882310 torch/_dynamo/guards.py:2297] [0/2] [__guards] GUARDS: V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] TREE_GUARD_MANAGER: V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] +- RootGuardManager V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- DEFAULT_DEVICE: utils_device.CURRENT_DEVICE == None # _dynamo/output_graph.py:471 in init_ambient_guards V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GLOBAL_STATE: ___check_global_state() V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- TORCH_FUNCTION_MODE_STACK: ___check_torch_function_mode_stack() V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['self'], accessed_by=DictGetItemGuardAccessor(self) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TYPE_MATCH: ___check_type_id(L['self'], 245047360) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=L['self']._buffers, accessed_by=GetAttrGuardAccessor(_buffers) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- DICT_LENGTH: not L['self']._buffers # _buffers = self.__dict__["_buffers"] # nn/modules/module.py:1908 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=L['self']._modules, accessed_by=GetAttrGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- DICT_LENGTH: len(L['self']._modules) == 8 # modules = self.__dict__["_modules"] # nn/modules/module.py:1912 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['pos_embed'], accessed_by=DictGetItemGuardAccessor(pos_embed) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'], 99356304) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['pos_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['pos_embed'].__dict__) # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim, accessed_by=DictGetItemGuardAccessor(axes_dim) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['pos_embed'].axes_dim, 7569792) # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- LENGTH_CHECK: len(L['self']._modules['pos_embed'].axes_dim) == 3 # self.axes_dim[i], pos[:, i], repeat_interleave_real=True, use_real=True, freqs_dtype=freqs_dtype # diffusers/src/diffusers/models/embeddings.py:630 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[0] == 16 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[1], accessed_by=TupleGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[1] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['pos_embed'].axes_dim[2], accessed_by=TupleGetItemGuardAccessor(2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- EQUALS_MATCH: L['self']._modules['pos_embed'].axes_dim[2] == 56 # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['pos_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['pos_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['pos_embed']._backward_pre_hooks # image_rotary_emb = self.pos_embed(ids) # diffusers/src/diffusers/models/transformers/transformer_flux.py:469 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['time_text_embed'], accessed_by=DictGetItemGuardAccessor(time_text_embed) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed'], 99372448) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['time_text_embed'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed'].__dict__) # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules) == 4 # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'], accessed_by=DictGetItemGuardAccessor(time_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['time_proj'], 99358192) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['time_proj'].__dict__) # guidance_proj = self.time_proj(guidance) # diffusers/src/diffusers/models/embeddings.py:994 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].scale, accessed_by=DictGetItemGuardAccessor(scale) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].scale == 1 # emb = scale * emb # diffusers/src/diffusers/models/embeddings.py:66 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].num_channels, accessed_by=DictGetItemGuardAccessor(num_channels) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].num_channels == 256 # half_dim = embedding_dim // 2 # diffusers/src/diffusers/models/embeddings.py:56 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, accessed_by=DictGetItemGuardAccessor(flip_sin_to_cos) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['time_proj'].flip_sin_to_cos, 7629952) # if flip_sin_to_cos: # diffusers/src/diffusers/models/embeddings.py:72 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['time_proj']._backward_pre_hooks # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift, accessed_by=DictGetItemGuardAccessor(downscale_freq_shift) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['time_text_embed']._modules['time_proj'].downscale_freq_shift == 0 # exponent = exponent / (half_dim - downscale_freq_shift) # diffusers/src/diffusers/models/embeddings.py:60 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'], accessed_by=DictGetItemGuardAccessor(timestep_embedder) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'], 99357248) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder'].__dict__) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING: check_no_aliasing(L['img_ids'], L['txt_ids'], L['guidance'], L['timestep'], L['hidden_states'], L['pooled_projections'], L['encoder_hidden_states'], L['self']._modules['proj_out']._parameters['bias'], L['self']._modules['proj_out']._parameters['weight'], L['self']._modules['x_embedder']._parameters['bias'], L['self']._modules['x_embedder']._parameters['weight'], L['self']._modules['context_embedder']._parameters['bias'], L['self']._modules['context_embedder']._parameters['weight'], L['self']._modules['norm_out']._modules['linear']._parameters['bias'], L['self']._modules['norm_out']._modules['linear']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['weight'], L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight']) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'], 96881248) # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].__dict__) # sample = self.act(sample) # diffusers/src/diffusers/models/embeddings.py:678 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['timestep_embedder']._backward_pre_hooks # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['timestep_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'], accessed_by=DictGetItemGuardAccessor(guidance_embedder) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'], 99357248) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder'].__dict__) # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules) == 3 # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'], 97167728) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1'].__dict__) # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 256], stride=[256, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'], accessed_by=DictGetItemGuardAccessor(act) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- OBJECT_ALIASING: L['self']._modules['time_text_embed']._modules['timestep_embedder']._modules['act'] is L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['act'] # if self.act is not None: # diffusers/src/diffusers/models/embeddings.py:677 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'], 97167728) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2'].__dict__) # sample = self.linear_2(sample) # diffusers/src/diffusers/models/embeddings.py:680 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['guidance_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, accessed_by=DictGetItemGuardAccessor(post_act) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].post_act, 7580768) # if self.post_act is not None: # diffusers/src/diffusers/models/embeddings.py:682 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._parameters # sample = self.linear_1(sample) # diffusers/src/diffusers/models/embeddings.py:675 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['guidance_embedder']._backward_pre_hooks # guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:995 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['guidance_embedder'].__class__.forward.__defaults__[0], 7580768) # if condition is not None: # diffusers/src/diffusers/models/embeddings.py:673 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'], accessed_by=DictGetItemGuardAccessor(text_embedder) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder'], 100455248) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder'].__dict__) # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules) == 3 # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], accessed_by=DictGetItemGuardAccessor(linear_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'], 97167728) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1'].__dict__) # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 768], stride=[768, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_1']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], accessed_by=DictGetItemGuardAccessor(act_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'], 96881248) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].__dict__) # hidden_states = self.act_1(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1443 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['act_1'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], accessed_by=DictGetItemGuardAccessor(linear_2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'], 97167728) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2'].__dict__) # hidden_states = self.linear_2(hidden_states) # diffusers/src/diffusers/models/embeddings.py:1444 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['time_text_embed']._modules['text_embedder']._modules['linear_2']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._parameters # hidden_states = self.linear_1(caption) # diffusers/src/diffusers/models/embeddings.py:1442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._modules['text_embedder']._backward_pre_hooks # pooled_projections = self.text_embedder(pooled_projection) # diffusers/src/diffusers/models/embeddings.py:999 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._parameters # timesteps_proj = self.time_proj(timestep) # diffusers/src/diffusers/models/embeddings.py:991 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['time_text_embed']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['time_text_embed']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['time_text_embed']._backward_pre_hooks # else self.time_text_embed(timestep, guidance, pooled_projections) # diffusers/src/diffusers/models/transformers/transformer_flux.py:452 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['context_embedder'], accessed_by=DictGetItemGuardAccessor(context_embedder) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['context_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['context_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['context_embedder'].__dict__) # encoder_hidden_states = self.context_embedder(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:454 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['context_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 4096], stride=[4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['context_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['context_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['x_embedder'], accessed_by=DictGetItemGuardAccessor(x_embedder) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['x_embedder'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['x_embedder'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['x_embedder'].__dict__) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['x_embedder']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 64], stride=[64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['x_embedder']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['x_embedder']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['transformer_blocks'], accessed_by=DictGetItemGuardAccessor(transformer_blocks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['attn'].processor, 139846062622704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['0']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['attn'].processor, 139846062624576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['1']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['attn'].processor, 139846063044304) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['2']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[3] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['attn'].processor, 139846063046176) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['3']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[4] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['attn'].processor, 139846066996704) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['4']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[5] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['attn'].processor, 139846066998768) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['5']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[6] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['attn'].processor, 139846062908992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['6']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[7] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['attn'].processor, 139846062911056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['7']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[8] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['attn'].processor, 139846069991072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['8']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[9] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['attn'].processor, 139846069993136) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['9']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[10] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['attn'].processor, 139846069233408) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['10']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[11] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['attn'].processor, 139846069235472) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['11']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[12] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['attn'].processor, 139846069036896) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['12']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[13] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['attn'].processor, 139846069038960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['13']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[14] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['attn'].processor, 139846068848576) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['14']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[15] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['attn'].processor, 139846068850640) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['15']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[16] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['attn'].processor, 139846068639824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['16']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[17] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['attn'].processor, 139846067916960) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['17']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules.keys())[18] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18'], 247975168) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18'].__dict__) # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules) == 7 # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], accessed_by=DictGetItemGuardAccessor(norm1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'], 99411648) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].__dict__) # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1']._backward_pre_hooks # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], accessed_by=DictGetItemGuardAccessor(norm1_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'], 99411648) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].__dict__) # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, accessed_by=DictGetItemGuardAccessor(emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context'].emb, 7580768) # if self.emb is not None: # diffusers/src/diffusers/models/normalization.py:135 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[18432], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[18432, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:139 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:137 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['norm1_context']._backward_pre_hooks # norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( # diffusers/src/diffusers/models/transformers/transformer_flux.py:167 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules) == 12 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], accessed_by=DictGetItemGuardAccessor(add_k_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'], 244529984) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].__dict__) # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_k_proj']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1736 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], accessed_by=DictGetItemGuardAccessor(add_v_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'], 244529984) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].__dict__) # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_v_proj']._backward_pre_hooks # encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1737 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], accessed_by=DictGetItemGuardAccessor(add_q_proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'], 244529984) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].__dict__) # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['add_q_proj']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1735 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], accessed_by=DictGetItemGuardAccessor(to_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'], 96863792) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules) == 2 # return len(self._modules) # nn/modules/container.py:352 in __len__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], accessed_by=DictGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'], 244529984) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].__dict__) # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['0']._backward_pre_hooks # hidden_states = attn.to_out[0](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1776 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], accessed_by=DictGetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'], 97677440) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].__dict__) # hidden_states = attn.to_out[1](hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1778 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_out']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], accessed_by=DictGetItemGuardAccessor(to_add_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'], 244529984) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].__dict__) # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['to_add_out']._backward_pre_hooks # encoder_hidden_states = attn.to_add_out(encoder_hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1779 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], accessed_by=DictGetItemGuardAccessor(norm_added_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'], 99397456) # if attn.norm_added_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1749 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].__dict__) # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_q']._backward_pre_hooks # encoder_hidden_states_query_proj = attn.norm_added_q(encoder_hidden_states_query_proj) # diffusers/src/diffusers/models/attention_processor.py:1750 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], accessed_by=DictGetItemGuardAccessor(norm_added_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'], 99397456) # if attn.norm_added_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1751 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].__dict__) # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._modules['norm_added_k']._backward_pre_hooks # encoder_hidden_states_key_proj = attn.norm_added_k(encoder_hidden_states_key_proj) # diffusers/src/diffusers/models/attention_processor.py:1752 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['attn'].processor, 139846067919072) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output, context_attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:172 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], accessed_by=DictGetItemGuardAccessor(norm2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'], 98072640) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].__dict__) # norm_hidden_states = self.norm2(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:182 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], accessed_by=DictGetItemGuardAccessor(ff) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff'], 239927888) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff'].__dict__) # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff']._backward_pre_hooks # ff_output = self.ff(norm_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:185 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], accessed_by=DictGetItemGuardAccessor(norm2_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'], 98072640) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].__dict__) # norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:195 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['norm2_context'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], accessed_by=DictGetItemGuardAccessor(ff_context) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'], 239927888) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context'].__dict__) # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules) == 1 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], accessed_by=DictGetItemGuardAccessor(net) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'], 96863792) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']) == 3 # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'], 236152192) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules) == 1 # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], accessed_by=DictGetItemGuardAccessor(proj) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'], 244529984) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].__dict__) # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._modules['proj']._backward_pre_hooks # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._parameters # hidden_states = self.proj(hidden_states) # diffusers/src/diffusers/models/activations.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0'].approximate == 'tanh' # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['0']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'], 97677440) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p, accessed_by=DictGetItemGuardAccessor(p) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].p == 0.0 # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].inplace, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, accessed_by=DictGetItemGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['1'].training, 7629920) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'], 244529984) # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].__dict__) # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 12288], stride=[12288, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 12288], stride=[12288, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._modules['net']._modules['2']._backward_pre_hooks # hidden_states = module(hidden_states) # diffusers/src/diffusers/models/attention.py:1166 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._parameters # for module in self.net: # diffusers/src/diffusers/models/attention.py:1165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._modules['ff_context']._backward_pre_hooks # context_ff_output = self.ff_context(norm_encoder_hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:198 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:165 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['transformer_blocks']._modules['18']._backward_pre_hooks # encoder_hidden_states, hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:494 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'], accessed_by=DictGetItemGuardAccessor(single_transformer_blocks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks'], 96863792) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[0] == '0' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].processor, 139846067704112) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['0']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['0']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[1] == '1' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].processor, 139846067704880) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['1']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['1']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=2 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[2] == '2' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].processor, 139846067705648) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['2']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['2']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=3 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[3] == '3' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].processor, 139846067706416) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['3']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['3']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=4 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[4] == '4' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].processor, 139846067707184) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['4']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['4']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=5 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[5] == '5' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].processor, 139846067507312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['5']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['5']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=6 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[6] == '6' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].processor, 139846067508080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['6']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['6']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=7 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[7] == '7' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].processor, 139846067508848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['7']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['7']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=8 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[8] == '8' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].processor, 139846067509616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['8']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['8']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=9 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[9] == '9' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].processor, 139846067510384) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['9']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['9']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=10 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[10] == '10' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].processor, 139846067511152) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['10']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['10']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=11 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[11] == '11' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].processor, 139846066291424) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['11']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['11']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=12 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[12] == '12' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].processor, 139846066292288) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['12']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['12']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=13 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[13] == '13' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].processor, 139846066293056) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['13']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['13']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=14 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[14] == '14' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].processor, 139846066293824) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['14']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['14']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=15 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[15] == '15' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].processor, 139846066294592) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['15']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['15']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=16 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[16] == '16' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].processor, 139846066090624) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['16']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['16']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=17 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[17] == '17' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].processor, 139846066091392) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['17']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['17']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=18 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[18] == '18' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].processor, 139846066092160) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['18']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['18']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=19 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[19] == '19' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].processor, 139846066092928) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['19']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['19']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=20 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[20] == '20' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].processor, 139846066093696) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['20']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['20']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=21 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[21] == '21' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].processor, 139846065365440) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['21']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['21']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=22 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[22] == '22' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].processor, 139846065366208) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['22']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['22']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=23 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[23] == '23' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].processor, 139846065366976) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['23']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['23']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=24 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[24] == '24' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].processor, 139846065367744) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['24']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['24']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=25 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[25] == '25' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].processor, 139846065368512) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['25']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['25']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=26 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[26] == '26' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].processor, 139846065164544) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['26']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['26']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=27 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[27] == '27' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].processor, 139846065165312) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['27']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['27']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=28 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[28] == '28' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].processor, 139846065166080) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['28']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['28']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=29 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[29] == '29' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].processor, 139846065166848) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['29']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['29']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=30 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[30] == '30' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].processor, 139846065167616) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['30']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['30']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=31 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[31] == '31' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].processor, 139846064955456) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['31']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['31']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=32 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[32] == '32' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].processor, 139846064956224) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['32']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['32']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=33 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[33] == '33' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].processor, 139846064956992) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['33']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['33']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=34 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[34] == '34' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].processor, 139846064957760) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['34']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['34']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=35 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[35] == '35' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].processor, 139846064958528) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['35']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['35']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=36 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[36] == '36' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].processor, 139846064959296) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['36']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['36']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- KeyValueManager pair at index=37 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules.keys())[37] == '37' # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37'], 247974224) # for index_block, block in enumerate(self.single_transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37'].__dict__) # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules) == 5 # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'], 99413712) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm'].__dict__) # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules) == 3 # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'], 96881248) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'], 244529984) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].__dict__) # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[9216], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[9216, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['linear']._backward_pre_hooks # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None] # diffusers/src/diffusers/models/normalization.py:171 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._parameters # emb = self.linear(self.silu(emb)) # diffusers/src/diffusers/models/normalization.py:169 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['norm']._backward_pre_hooks # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], accessed_by=DictGetItemGuardAccessor(proj_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'], 244529984) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[12288], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[12288, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_mlp']._backward_pre_hooks # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], accessed_by=DictGetItemGuardAccessor(act_mlp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'], 96844336) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].__dict__) # mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) # diffusers/src/diffusers/models/transformers/transformer_flux.py:89 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate, accessed_by=DictGetItemGuardAccessor(approximate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['act_mlp'].approximate == 'tanh' # return F.gelu(input, approximate=self.approximate) # nn/modules/activation.py:734 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'], 244529984) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].__dict__) # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 15360], stride=[15360, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 15360], stride=[15360, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['proj_out']._backward_pre_hooks # hidden_states = gate * self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:98 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], accessed_by=DictGetItemGuardAccessor(attn) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'], 239601328) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__dict__) # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads, accessed_by=DictGetItemGuardAccessor(heads) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].heads == 24 # head_dim = inner_dim // attn.heads # diffusers/src/diffusers/models/attention_processor.py:1721 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules) == 5 # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], accessed_by=DictGetItemGuardAccessor(norm_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'], 99397456) # if attn.norm_q is not None: # diffusers/src/diffusers/models/attention_processor.py:1727 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].__dict__) # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_q']._backward_pre_hooks # query = attn.norm_q(query) # diffusers/src/diffusers/models/attention_processor.py:1728 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], accessed_by=DictGetItemGuardAccessor(norm_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'], 99397456) # if attn.norm_k is not None: # diffusers/src/diffusers/models/attention_processor.py:1729 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].__dict__) # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k'].eps == 1e-06 # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters) == 1 # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[128], stride=[1]) # if self.weight is not None: # diffusers/src/diffusers/models/normalization.py:430 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['norm_k']._backward_pre_hooks # key = attn.norm_k(key) # diffusers/src/diffusers/models/attention_processor.py:1730 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], accessed_by=DictGetItemGuardAccessor(to_q) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'], 244529984) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].__dict__) # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_q']._backward_pre_hooks # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], accessed_by=DictGetItemGuardAccessor(to_k) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'], 244529984) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].__dict__) # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_k']._backward_pre_hooks # key = attn.to_k(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1717 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], accessed_by=DictGetItemGuardAccessor(to_v) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'], 244529984) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].__dict__) # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling, accessed_by=DictGetItemGuardAccessor(scaling) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling) == 2 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling['default_0'] == 1.0 # scaling = self.scaling[active_adapter] # peft/tuners/lora/layer.py:505 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].scaling['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules) == 6 # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'], accessed_by=DictGetItemGuardAccessor(base_layer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'], 97167728) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer'].__dict__) # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['base_layer']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[3072], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'], accessed_by=DictGetItemGuardAccessor(lora_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'], 96865328) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'], 97271760) # dropout = self.lora_dropout[active_adapter] # peft/tuners/lora/layer.py:504 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_dropout']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'], accessed_by=DictGetItemGuardAccessor(lora_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'], 96865328) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- DictGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=0 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[0] == 'default_0' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- ValueManager: GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'], 97167728) # if active_adapter not in self.lora_A.keys(): # peft/tuners/lora/layer.py:500 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters) == 2 # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[16, 3072], stride=[3072, 1]) # x = x.to(lora_A.weight.dtype) # peft/tuners/lora/layer.py:506 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- KeyValueManager pair at index=1 V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- KeyManager: GuardManager: source=list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: list(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_A']._modules.keys())[1] == 'default_1' # return self._modules.keys() # nn/modules/container.py:539 in keys V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'], accessed_by=DictGetItemGuardAccessor(lora_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'], 96865328) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules) == 2 # return self._modules[key] # nn/modules/container.py:502 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'], 97167728) # lora_B = self.lora_B[active_adapter] # peft/tuners/lora/layer.py:503 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0'].__dict__) # result = result + lora_B(lora_A(dropout(x))) * scaling # peft/tuners/lora/layer.py:509 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=True, size=[3072, 16], stride=[16, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_0']._parameters['bias'], 7580768) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_B']._modules['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_embedding_A'], accessed_by=DictGetItemGuardAccessor(lora_embedding_A) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._modules['lora_embedding_B'], accessed_by=DictGetItemGuardAccessor(lora_embedding_B) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora, accessed_by=DictGetItemGuardAccessor(use_dora) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora) == 2 # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora['default_0'], accessed_by=DictGetItemGuardAccessor(default_0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora['default_0'], 7629920) # if not self.use_dora[active_adapter]: # peft/tuners/lora/layer.py:508 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].use_dora['default_1'], accessed_by=DictGetItemGuardAccessor(default_1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._parameters # result = self.base_layer(x, *args, **kwargs) # peft/tuners/lora/layer.py:497 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter, accessed_by=DictGetItemGuardAccessor(_active_adapter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter, 7593792) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter) == 1 # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter[0], accessed_by=ListGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._active_adapter[0] == 'default_0' # for active_adapter in self.active_adapters: # peft/tuners/lora/layer.py:499 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters, accessed_by=DictGetItemGuardAccessor(merged_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters, 7593792) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- LENGTH_CHECK: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v'].merged_adapters # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._disable_adapters, accessed_by=DictGetItemGuardAccessor(_disable_adapters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._disable_adapters, 7629920) # if self.disable_adapters: # peft/tuners/lora/layer.py:488 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._modules['to_v']._backward_pre_hooks # value = attn.to_v(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1718 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, accessed_by=DictGetItemGuardAccessor(processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 239395136) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].processor, 139846064755328) # return self.processor( # diffusers/src/diffusers/models/attention_processor.py:490 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._parameters # query = attn.to_q(hidden_states) # diffusers/src/diffusers/models/attention_processor.py:1716 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn']._backward_pre_hooks # attn_output = self.attn( # diffusers/src/diffusers/models/transformers/transformer_flux.py:91 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__, accessed_by=GetAttrGuardAccessor(__class__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=GetAttrGuardAccessor(forward) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['single_transformer_blocks']._modules['37']._modules['attn'].__class__.forward.__defaults__[0], 7580768) # batch_size, _, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape # diffusers/src/diffusers/models/attention_processor.py:1713 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._parameters # norm_hidden_states, gate = self.norm(hidden_states, emb=temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:88 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DictSubclassGuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- DICT_LENGTH: not L['self']._modules['single_transformer_blocks']._modules['37']._backward_pre_hooks # hidden_states = block( # diffusers/src/diffusers/models/transformers/transformer_flux.py:531 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['norm_out'], accessed_by=DictGetItemGuardAccessor(norm_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out'], 99394624) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['norm_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out'].__dict__) # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules, accessed_by=DictGetItemGuardAccessor(_modules) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules) == 3 # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'], accessed_by=DictGetItemGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['silu'], 96881248) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['silu'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['silu'].inplace, accessed_by=DictGetItemGuardAccessor(inplace) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['silu'].inplace, 7629920) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'], accessed_by=DictGetItemGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['linear'], 97167728) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['linear'].__dict__) # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['linear']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[6144, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['linear']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['norm_out']._modules['linear']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[6144], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'], accessed_by=DictGetItemGuardAccessor(norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'], 98072640) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['norm_out']._modules['norm'].__dict__) # x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] # diffusers/src/diffusers/models/normalization.py:306 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].eps, accessed_by=DictGetItemGuardAccessor(eps) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].eps == 1e-06 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- DICT_LENGTH: len(L['self']._modules['norm_out']._modules['norm']._parameters) == 2 # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['weight'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- ID_MATCH: ___check_obj_id(L['self']._modules['norm_out']._modules['norm']._parameters['bias'], 7580768) # input, self.normalized_shape, self.weight, self.bias, self.eps # nn/modules/normalization.py:218 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape, accessed_by=DictGetItemGuardAccessor(normalized_shape) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['norm_out']._modules['norm'].normalized_shape, 7569792) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- LENGTH_CHECK: len(L['self']._modules['norm_out']._modules['norm'].normalized_shape) == 1 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | +- GuardManager: source=L['self']._modules['norm_out']._modules['norm'].normalized_shape[0], accessed_by=TupleGetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | | | | +- EQUALS_MATCH: L['self']._modules['norm_out']._modules['norm'].normalized_shape[0] == 3072 # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._parameters # emb = self.linear(self.silu(conditioning_embedding).to(x.dtype)) # diffusers/src/diffusers/models/normalization.py:304 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_hooks, accessed_by=DictGetItemGuardAccessor(_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_hooks, accessed_by=DictGetItemGuardAccessor(_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DictSubclassGuardManager: source=L['self']._modules['norm_out']._forward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['norm_out']._backward_pre_hooks, accessed_by=DictGetItemGuardAccessor(_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: not L['self']._modules['norm_out']._backward_pre_hooks # hidden_states = self.norm_out(hidden_states, temb) # diffusers/src/diffusers/models/transformers/transformer_flux.py:548 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=L['self']._modules['proj_out'], accessed_by=DictGetItemGuardAccessor(proj_out) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- TYPE_MATCH: ___check_type_id(L['self']._modules['proj_out'], 97167728) # if name in modules: # nn/modules/module.py:1913 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=L['self']._modules['proj_out'].__dict__, accessed_by=GetGenericDictGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- DICT_CONTAINS: not ___dict_contains('forward', L['self']._modules['proj_out'].__dict__) # output = self.proj_out(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:549 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters, accessed_by=DictGetItemGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- DICT_LENGTH: len(L['self']._modules['proj_out']._parameters) == 2 # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['weight'], accessed_by=DictGetItemGuardAccessor(weight) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['weight'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[64, 3072], stride=[3072, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- GuardManager: source=L['self']._modules['proj_out']._parameters['bias'], accessed_by=DictGetItemGuardAccessor(bias) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- TENSOR_MATCH: check_tensor(L['self']._modules['proj_out']._parameters['bias'], Parameter, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[64], stride=[1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=L['self'].training, accessed_by=GetAttrGuardAccessor(training) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(L['self'].training, 7629920) # if self.training and self.gradient_checkpointing: # diffusers/src/diffusers/models/transformers/transformer_flux.py:472 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=L['self']._parameters, accessed_by=GetAttrGuardAccessor(_parameters) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- DICT_LENGTH: not L['self']._parameters # _parameters = self.__dict__["_parameters"] # nn/modules/module.py:1904 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=L['self']._internal_dict, accessed_by=GetAttrGuardAccessor(_internal_dict) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- TYPE_MATCH: ___check_type_id(L['self']._internal_dict, 221665040) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'norm_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'proj_out') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'pos_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'x_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'time_text_embed') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'context_embedder') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- NO_HASATTR: not hasattr(L['self']._internal_dict, 'single_transformer_blocks') # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['img_ids'], accessed_by=DictGetItemGuardAccessor(img_ids) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['img_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[4096, 3], stride=[3, 1]) # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['img_ids'], '_dynamo_dynamic_indices') == False # if img_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:462 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['txt_ids'], accessed_by=DictGetItemGuardAccessor(txt_ids) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['txt_ids'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[512, 3], stride=[3, 1]) # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['txt_ids'], '_dynamo_dynamic_indices') == False # if txt_ids.ndim == 3: # diffusers/src/diffusers/models/transformers/transformer_flux.py:456 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['guidance'], accessed_by=DictGetItemGuardAccessor(guidance) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['guidance'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.float32, device=0, requires_grad=False, size=[1], stride=[1]) # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['guidance'], '_dynamo_dynamic_indices') == False # if guidance is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:445 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['timestep'], accessed_by=DictGetItemGuardAccessor(timestep) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['timestep'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1], stride=[1]) # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['timestep'], '_dynamo_dynamic_indices') == False # timestep = timestep.to(hidden_states.dtype) * 1000 # diffusers/src/diffusers/models/transformers/transformer_flux.py:444 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['return_dict'], accessed_by=DictGetItemGuardAccessor(return_dict) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- ID_MATCH: ___check_obj_id(L['return_dict'], 7629920) # if not return_dict: # diffusers/src/diffusers/models/transformers/transformer_flux.py:555 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['hidden_states'], accessed_by=DictGetItemGuardAccessor(hidden_states) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 4096, 64], stride=[262144, 64, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['pooled_projections'], accessed_by=DictGetItemGuardAccessor(pooled_projections) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['pooled_projections'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 768], stride=[768, 1]) # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['pooled_projections'], '_dynamo_dynamic_indices') == False # timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) # (N, D) # diffusers/src/diffusers/models/embeddings.py:992 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['encoder_hidden_states'], accessed_by=DictGetItemGuardAccessor(encoder_hidden_states) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- TENSOR_MATCH: check_tensor(L['encoder_hidden_states'], Tensor, DispatchKeySet(CUDA, BackendSelect, ADInplaceOrView, AutogradCUDA), torch.bfloat16, device=0, requires_grad=False, size=[1, 512, 4096], stride=[2097152, 4096, 1]) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_HASATTR: hasattr(L['encoder_hidden_states'], '_dynamo_dynamic_indices') == False # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- NO_TENSOR_ALIASING V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['joint_attention_kwargs'], accessed_by=DictGetItemGuardAccessor(joint_attention_kwargs) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- ID_MATCH: ___check_obj_id(L['joint_attention_kwargs'], 7580768) # if joint_attention_kwargs is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['controlnet_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_block_samples) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_block_samples'], 7580768) # if controlnet_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:502 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=L['controlnet_single_block_samples'], accessed_by=DictGetItemGuardAccessor(controlnet_single_block_samples) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- ID_MATCH: ___check_obj_id(L['controlnet_single_block_samples'], 7580768) # if controlnet_single_block_samples is not None: # diffusers/src/diffusers/models/transformers/transformer_flux.py:538 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | +- GuardManager: source=G, accessed_by=GlobalsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['USE_PEFT_BACKEND'], accessed_by=DictGetItemGuardAccessor(USE_PEFT_BACKEND) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['USE_PEFT_BACKEND'], 7629952) # if USE_PEFT_BACKEND: # diffusers/src/diffusers/models/transformers/transformer_flux.py:434 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['scale_lora_layers'], accessed_by=DictGetItemGuardAccessor(scale_lora_layers) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['scale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['scale_lora_layers'].__code__, 139856000130016) # scale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:436 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['unscale_lora_layers'], accessed_by=DictGetItemGuardAccessor(unscale_lora_layers) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['unscale_lora_layers'].__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['unscale_lora_layers'].__code__, 139856000130192) # unscale_lora_layers(self, lora_scale) # diffusers/src/diffusers/models/transformers/transformer_flux.py:553 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__builtins_dict___4'], accessed_by=DictGetItemGuardAccessor(__builtins_dict___4) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['int'], accessed_by=DictGetItemGuardAccessor(int) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['int'], 7592000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['len'], accessed_by=DictGetItemGuardAccessor(len) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['len'], 139859477898240) # assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" # diffusers/src/diffusers/models/embeddings.py:54 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['set'], accessed_by=DictGetItemGuardAccessor(set) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['set'], 7574816) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['str'], accessed_by=DictGetItemGuardAccessor(str) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['str'], 7556320) # if isinstance(self.active_adapter, str): # peft/tuners/tuners_utils.py:469 in active_adapters V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['bool'], accessed_by=DictGetItemGuardAccessor(bool) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['bool'], 7629504) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['iter'], accessed_by=DictGetItemGuardAccessor(iter) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['iter'], 139859477898160) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['range'], accessed_by=DictGetItemGuardAccessor(range) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['range'], 7576320) # for i in range(n_axes): # diffusers/src/diffusers/models/embeddings.py:628 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['slice'], accessed_by=DictGetItemGuardAccessor(slice) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['slice'], 7571168) # if isinstance(idx, slice): # nn/modules/container.py:331 in __getitem__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['super'], accessed_by=DictGetItemGuardAccessor(super) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['super'], 7562400) # return super().__getattr__(name) # diffusers/src/diffusers/models/modeling_utils.py:151 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['hasattr'], accessed_by=DictGetItemGuardAccessor(hasattr) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['hasattr'], 139859477897600) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['enumerate'], accessed_by=DictGetItemGuardAccessor(enumerate) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['enumerate'], 7452256) # for index_block, block in enumerate(self.transformer_blocks): # diffusers/src/diffusers/models/transformers/transformer_flux.py:471 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__builtins_dict___4']['isinstance'], accessed_by=DictGetItemGuardAccessor(isinstance) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__builtins_dict___4']['isinstance'], 139859477898000) # if isinstance(pos, int): # diffusers/src/diffusers/models/embeddings.py:547 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'], accessed_by=DictGetItemGuardAccessor(__import_peft_dot_tuners_dot_tuners_utils) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_peft_dot_tuners_dot_tuners_utils'], 139846567629904) # return bool(self.merged_adapters) # peft/tuners/tuners_utils.py:455 in merged V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, accessed_by=GetAttrGuardAccessor(BaseTunerLayer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_peft_dot_tuners_dot_tuners_utils'].BaseTunerLayer, 244527504) # from peft.tuners.tuners_utils import BaseTunerLayer # diffusers/src/diffusers/utils/peft_utils.py:113 in scale_lora_layers V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention'], 139846585526608) # if len(args) > 0 or kwargs.get("scale", None) is not None: # diffusers/src/diffusers/models/attention.py:1162 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_embeddings) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'], 139846586042224) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math, accessed_by=GetAttrGuardAccessor(math) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math, 139859475307632) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].math.log, accessed_by=GetAttrGuardAccessor(log) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].math.log, 139859474302592) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch, 139859475513072) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # ids = torch.cat((txt_ids, img_ids), dim=0) # diffusers/src/diffusers/models/transformers/transformer_flux.py:468 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1755 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, accessed_by=GetAttrGuardAccessor(cat) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cat, 139859471507920) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, accessed_by=GetAttrGuardAccessor(cos) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.cos, 139859471509120) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, accessed_by=GetAttrGuardAccessor(exp) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.exp, 139859471510480) # emb = torch.exp(exponent) # diffusers/src/diffusers/models/embeddings.py:62 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, accessed_by=GetAttrGuardAccessor(sin) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.sin, 139856572546480) # emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) # diffusers/src/diffusers/models/embeddings.py:69 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, accessed_by=GetAttrGuardAccessor(chunk) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.chunk, 139859471508080) # scale, shift = torch.chunk(emb, 2, dim=1) # diffusers/src/diffusers/models/normalization.py:305 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, accessed_by=GetAttrGuardAccessor(outer) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.outer, 139856572571056) # freqs = torch.outer(t, freqs) # type: ignore # [S, D/2] # diffusers/src/diffusers/models/embeddings.py:552 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, accessed_by=GetAttrGuardAccessor(rsqrt) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.rsqrt, 139859471462208) # hidden_states = hidden_states * torch.rsqrt(variance + self.eps) # diffusers/src/diffusers/models/normalization.py:428 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, accessed_by=GetAttrGuardAccessor(stack) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.stack, 139859471467760) # x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) # diffusers/src/diffusers/models/embeddings.py:595 in apply_rotary_emb V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, accessed_by=GetAttrGuardAccessor(arange) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.arange, 139859471382304) # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float16, accessed_by=GetAttrGuardAccessor(float16) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float16 == torch.float16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float32, accessed_by=GetAttrGuardAccessor(float32) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float32 == torch.float32 # start=0, end=half_dim, dtype=torch.float32, device=timesteps.device # diffusers/src/diffusers/models/embeddings.py:58 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.float64, accessed_by=GetAttrGuardAccessor(float64) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.float64 == torch.float64 # freqs_dtype = torch.float32 if is_mps else torch.float64 # diffusers/src/diffusers/models/embeddings.py:627 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16, accessed_by=GetAttrGuardAccessor(bfloat16) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].torch.bfloat16 == torch.bfloat16 # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, accessed_by=GetAttrGuardAccessor(from_numpy) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].torch.from_numpy, 139859471373712) # t = torch.from_numpy(pos).to(freqs.device) # type: ignore # [S] # diffusers/src/diffusers/models/embeddings.py:551 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=GetAttrGuardAccessor(apply_rotary_emb) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__code__, 139855833426912) # from .embeddings import apply_rotary_emb # diffusers/src/diffusers/models/attention_processor.py:1760 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[0], 7629952) # if use_real: # diffusers/src/diffusers/models/embeddings.py:586 in apply_rotary_emb V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1], accessed_by=GetItemGuardAccessor(1) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].apply_rotary_emb.__defaults__[1] == -1 # if use_real_unbind_dim == -1: # diffusers/src/diffusers/models/embeddings.py:592 in apply_rotary_emb V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=GetAttrGuardAccessor(get_timestep_embedding) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__code__, 139855833348912) # t_emb = get_timestep_embedding( # diffusers/src/diffusers/models/embeddings.py:696 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_timestep_embedding.__defaults__[3] == 10000 # exponent = -math.log(max_period) * torch.arange( # diffusers/src/diffusers/models/embeddings.py:57 in get_timestep_embedding V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=GetAttrGuardAccessor(get_1d_rotary_pos_embed) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__code__, 139855833425856) # cos, sin = get_1d_rotary_pos_embed( # diffusers/src/diffusers/models/embeddings.py:629 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed, accessed_by=FuncDefaultsGuardAccessor V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0], accessed_by=GetItemGuardAccessor(0) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[0] == 10000.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2], accessed_by=GetItemGuardAccessor(2) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[2] == 1.0 # freqs = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype)[: (dim // 2)] / dim)) / linear_factor # [D/2] # diffusers/src/diffusers/models/embeddings.py:550 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3], accessed_by=GetItemGuardAccessor(3) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- EQUALS_MATCH: G['__import_diffusers_dot_models_dot_embeddings'].get_1d_rotary_pos_embed.__defaults__[3] == 1.0 # theta = theta * ntk_factor # diffusers/src/diffusers/models/embeddings.py:549 in get_1d_rotary_pos_embed V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'], 139856042419520) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F, 139856042421440) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, accessed_by=GetAttrGuardAccessor(gelu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.gelu, 139856049958000) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, accessed_by=GetAttrGuardAccessor(silu) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.silu, 139856039794000) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, accessed_by=GetAttrGuardAccessor(linear) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.linear, 139856049959216) # return F.linear(input, self.weight, self.bias) # nn/modules/linear.py:125 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, accessed_by=GetAttrGuardAccessor(dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.dropout, 139856039777904) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, accessed_by=GetAttrGuardAccessor(layer_norm) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.layer_norm, 139856039795440) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, accessed_by=GetAttrGuardAccessor(scaled_dot_product_attention) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_linear'].F.scaled_dot_product_attention, 139856049962576) # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_module) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_module'], 139856045629904) # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_forward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_forward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks, accessed_by=GetAttrGuardAccessor(_global_backward_pre_hooks) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- DICT_LENGTH: not G['__import_torch_dot_nn_dot_modules_dot_module']._global_backward_pre_hooks # hidden_states = self.x_embedder(hidden_states) # diffusers/src/diffusers/models/transformers/transformer_flux.py:442 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_activations) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_activations'], 139855999915008) # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_activations'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_activations'].F # return F.gelu(gate, approximate=self.approximate) # diffusers/src/diffusers/models/activations.py:83 in gelu V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_dropout) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_dropout'], 139856038787168) # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_dropout'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_dropout'].F # return F.dropout(input, self.p, self.training, self.inplace) # nn/modules/dropout.py:70 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_normalization) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_normalization'], 139846585727152) # variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) # diffusers/src/diffusers/models/normalization.py:427 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_normalization'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_normalization'].torch # if self.weight.dtype in [torch.float16, torch.bfloat16]: # diffusers/src/diffusers/models/normalization.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_container) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'], 139856039033488) # return iter(self._modules.values()) # nn/modules/container.py:356 in __iter__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator, accessed_by=GetAttrGuardAccessor(operator) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator, 139859476171088) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, accessed_by=GetAttrGuardAccessor(index) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_container'].operator.index, 139859476211968) # idx = operator.index(idx) # nn/modules/container.py:314 in _get_abs_string_index V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_modeling_utils'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_modeling_utils) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_modeling_utils'], 139855997135104) # is_in_config = "_internal_dict" in self.__dict__ and hasattr(self.__dict__["_internal_dict"], name) # diffusers/src/diffusers/models/modeling_utils.py:142 in __getattr__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_activation) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_activation'], 139856042420880) # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_activation'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_activation'].F # return F.silu(input, inplace=self.inplace) # nn/modules/activation.py:432 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'], accessed_by=DictGetItemGuardAccessor(__import_torch_dot_nn_dot_modules_dot_normalization) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_torch_dot_nn_dot_modules_dot_normalization'], 139856038867568) # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_torch_dot_nn_dot_modules_dot_normalization'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_torch_dot_nn_dot_modules_dot_normalization'].F # return F.layer_norm( # nn/modules/normalization.py:217 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'], accessed_by=DictGetItemGuardAccessor(__import_diffusers_dot_models_dot_attention_processor) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'], 139855999915568) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect, accessed_by=GetAttrGuardAccessor(inspect) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect, 139859475305312) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature, accessed_by=GetAttrGuardAccessor(signature) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, accessed_by=GetAttrGuardAccessor(__code__) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | | | +- ID_MATCH: ___check_obj_id(G['__import_diffusers_dot_models_dot_attention_processor'].inspect.signature.__code__, 139859474846800) # attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys()) # diffusers/src/diffusers/models/attention_processor.py:479 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].F, accessed_by=GetAttrGuardAccessor(F) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_torch_dot_nn_dot_modules_dot_linear'].F is G['__import_diffusers_dot_models_dot_attention_processor'].F # hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) # diffusers/src/diffusers/models/attention_processor.py:1765 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- GuardManager: source=G['__import_diffusers_dot_models_dot_attention_processor'].torch, accessed_by=GetAttrGuardAccessor(torch) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['__import_diffusers_dot_models_dot_attention_processor'].torch # query = torch.cat([encoder_hidden_states_query_proj, query], dim=2) # diffusers/src/diffusers/models/attention_processor.py:1755 in __call__ V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | +- GuardManager: source=G['torch'], accessed_by=DictGetItemGuardAccessor(torch) V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards] | | | +- OBJECT_ALIASING: G['__import_diffusers_dot_models_dot_embeddings'].torch is G['torch'] # ids = torch.cat((txt_ids, img_ids), dim=0) # diffusers/src/diffusers/models/transformers/transformer_flux.py:468 in forward V0828 05:05:29.845303 1882310 torch/_dynamo/guards.py:2263] [0/2] [__guards]